%load_ext autoreload
%autoreload 2
from skyfield.api import load
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from skyfield.api import utc
from scipy.optimize import brentq # machine learning
from datetime import timedelta, datetime
import pytz
from definitions import *
from_binary_to_element_symbols = {
'11': '🜁',
'01': '🜂',
'10': '🜄',
'00': '🜃',
}
# from_binary_to_element_ix = {
# '11': '0',
# '01': '1',
# '10': '2',
# '00': '3',
# }
from_binary_to_element_ix = {
'11': '-1',
'01': '0',
'10': '1',
'00': '2',
}
b_to_q = lambda x: [from_binary_to_element_symbols[x[i*2:i*2+2]] for i in [0,1,2]]
b_to_q_el = lambda x: [from_binary_to_element_ix[x[i*2:i*2+2]] for i in [0,1,2]]
from_binary_to_element_symbols['00']
print ( b_to_q('000000'))
print ( b_to_q_el('101010'))
# index elemental composition of the I Ching
iching_ix = [b_to_q_el(str(x['binary']))for x in iching]
# binary position of the I Ching
iching_binary = [b_to_q(str(x['binary']))for x in iching]
# important parameters
design_offset = 88 * math.pi / 180 # required to determine design parameters
earth_offset = math.pi # earth offset (180* against the sun)
pi2 = math.pi*2 # circle constant
# Important parameters and statistics for mandala wheel calculation
hex_width = pi2/64
line_width = hex_width/6
color_width = line_width/6
tone_width = color_width/6
base_width = tone_width/5
# Credits to Ra Uru Hu for receiving this info
iching_map = [36, 22, 63, 37, 55, 30, 49, 13, 19, 41, 60, 61, 54, 38, 58, 10, 11, 26, 5, 9, 34, 14, 43, 1, 44, 28, 50, 32, 57, 48, 18, 46, 6, 47, 64, 40, 59, 29, 4, 7, 33, 31, 56, 62, 53, 39, 52, 15, 12, 45, 35, 16, 20, 8, 23, 2, 24, 27, 3, 42, 51, 21, 17, 25]
iching_map.reverse()
# how precise decimals are shown in table
pd.set_option('display.float_format', '{:.10f}'.format)
# load Ephemeris
# load library planet position file
planets = load('de431t.bsp')
_planets = {
"sun": planets['SUN'],
"earth": planets['EARTH'],
"moon": planets['MOON'],
"mercury": planets['MERCURY'],
"venus": planets['VENUS BARYCENTER'],
"mars": planets['MARS BARYCENTER'],
"jupiter": planets['JUPITER BARYCENTER'],
"saturn": planets['SATURN BARYCENTER'],
"uranus": planets['URANUS BARYCENTER'],
"neptune": planets['NEPTUNE BARYCENTER'],
"pluto": planets['PLUTO BARYCENTER'],
}
ts = load.timescale(builtin=True)
# For iInfo Purposes
# lat = _planets['earth'].at(t_time_array).observe(_planets['venus']).ecliptic_latlon()[0].radians - *math.pi
# lon = _planets['earth'].at(t_time_array).observe(_planets['venus']).ecliptic_latlon()[1].radians - math.pi
# d = _planets['earth'].at(t_time_array).observe(_planets['venus']).ecliptic_latlon()[2]
def generate_planets(time_range):
""" Generates a dataframe for the registered collection of dates """
df = pd.DataFrame()
df['time'] = np.array(time_range.tt)
for planet_name, p in _planets.items():
earth_offset = math.pi if planet_name == "earth" else 0.0
df[planet_name] = _planets['earth'].at(time_range).observe(p).ecliptic_latlon()[1].radians + earth_offset # latitude
return df
hours_in_100_years = 24 * 365 * 10
t_time_array = ts.utc(2010, 1, 1, range(0,hours_in_100_years), 0) # -3000 BC to 3000 BC, increments in hours
df_planets = generate_planets(t_time_array) # takes a while
t_time_array[0]
df_planets.tail()
len(df_planets)
import math
math.pi
# for one rad, return array of elements
# for one row, return array of elements
# for one
df_planets.apply(lambda x: x/(math.pi**2))
import seaborn as sns
normalize_squared = lambda x: x/(math.pi**2)
normalize = lambda x: x/(math.pi)
# df_planets.apply(lambda x: x/(math.pi**2))
df_planets['earth'] = df_planets['sun'] - math.pi
sns.lineplot(x='time', y='earth', data=df_planets[:2000])
sns.lineplot(x='time', y='sun', data=df_planets.apply(normalize)[:2000])
sns.lineplot(x='time', y='sun', data=df_planets.apply(normalize_squared)[:2000])
def neutron_stream_pos(planet_position):
""" returns mandala position (base 64) given planet position"""
return ( (planet_position + (2*line_width - 1*color_width - 2*tone_width) ) / (2*math.pi) * 64) % 64
def map_on_hexagram(df):
""" maps df planet positions onto position onto a hexagram and line """
# convert dataframe to numpy array
neutron_stream = df.to_numpy()
hexagram_bin = np.floor(neutron_stream) # rounded up downwards
# map bin number onto 'hexagram' (neutron stream is sequential order, hexagram is King Wen Sequence)
strong = np.array(iching_map)
flat = hexagram_bin.astype(int).flatten()
previous_shape = neutron_stream.shape
mapped = strong[flat]
hexagram = mapped.reshape(previous_shape)
hexagram_fraction = neutron_stream - hexagram_bin
line = hexagram_fraction // (1/6) + 1 # count in which 6th this neutrino stream falls in
line_fraction = (hexagram_fraction - (line - 1)*1/6 ) / (1/6)
color = line_fraction // (1/6) + 1
color_fraction = (line_fraction - (color -1) * 1/6) / (1/6)
tone = color_fraction // (1/6) + 1
return hexagram #, line.astype(int), color.astype(int), tone.astype(int)
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
df_angles = neutron_stream_pos(df_planets.iloc[:, 1:6])
z = map_on_hexagram(df_angles)
z
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
many_2_b[63]
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
el_b
el.shape
z.shape
el.reshape((87600, 5, 3))
finish = el.reshape((87600, 15))
finish
df_finish = pd.DataFrame(finish)
df_finish.head(20)
import seaborn as sns
uniform_data = np.random.rand(10, 12)
data = finish.astype(int)
data[1:5].shape
ax = sns.heatmap(data[1:10])
ax = sns.heatmap(data[1:50])
ax = sns.heatmap(data[1:100])
ax = sns.heatmap(data[1:500])
ax = sns.heatmap(data[1:3000])
let's add extra time to see how this progresses
ax = sns.heatmap(data[3000:6000])
ax = sns.heatmap(data[6000:9000])
ax = sns.heatmap(data[9000:12000])
ax = sns.heatmap(data[12000:15000])
ax = sns.heatmap(data[0:10000])
ax = sns.heatmap(data[0:80000])
water = sns.color_palette("tab10")[0]
air = sns.color_palette("tab10")[1]
earth = sns.color_palette("tab10")[2]
fire = sns.color_palette("tab10")[3]
color_map = [air, fire, water, earth]
fig, ax = plt.subplots(figsize=(10,50))
sns.heatmap(data[0:10000], ax=ax, cmap=color_map, cbar=False)
data.transpose()
one_year = 24 * 365
fig, ax = plt.subplots(figsize=(50,5))
sns.heatmap(data[one_year*0:one_year*1].transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(50,5))
sns.heatmap(data[one_year*1:one_year*2].transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(50,5))
sns.heatmap(data[one_year*2:one_year*3].transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(50,5))
sns.heatmap(data[one_year*3:one_year*4].transpose(), ax=ax, cmap=color_map, cbar=False)
There's something wrong with the currently presented mapping, as it doesn't show fire-fire-fire (63) nor water-water-water (64). This causes a lot of randomness which can be best dealt with from the start, before any further iteration occurs.
data_fixed = finish.astype(int)
water = "#0E61B0"
air = "#C29F17"
earth = "#55A349"
fire = "#C9280C"
color_map = [air, fire, water, earth]
fig, ax = plt.subplots(figsize=(50,5))
sns.heatmap(data_fixed[one_year*0:one_year*1].transpose(), ax=ax, cmap=color_map, cbar=False)
## add a few planets
data_tmp = get_elemental_map(df_planets.loc[:,['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']])
fig, ax = plt.subplots(figsize=(50,10))
sns.heatmap(data_tmp[one_year*0:one_year*1].transpose(), ax=ax, cmap=color_map, cbar=False)
hours_in_desired_timespan = 24 * 365 * 2 # 2 years in hours
t_time_array = ts.utc(2019, 1, 1, range(0,hours_in_desired_timespan), 0) # -3000 BC to 3000 BC, increments in hours
df_planets_2019_2020 = generate_planets(t_time_array) # takes a while
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
data_tmp = get_elemental_map(df_planets_2019_2020.loc[:,r])
fig, ax = plt.subplots(figsize=(50,15))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
print (t_time_array[0].utc_datetime())
print (t_time_array[hours_in_desired_timespan-1].utc_datetime())
import pandas as pd
import numpy as np
df_c = pd.read_csv('Binance_BTCUSDT_1h.csv', parse_dates=True)
df_c['date'].dtype
df_c['date'] = pd.to_datetime(df_c['unix'], unit='ms')
df_money = df_c.set_index('date')
df_money['close'].plot()
len(df_money)
df_money
hours_in_trading_code = len(df_money) # stock exchange count of number differences
t_time_array = ts.utc(2020, 8, 1, range(0,hours_in_trading_code), 0) # -3000 BC to 3000 BC, increments in hours
df_crypto_2_months = generate_planets(t_time_array) # takes a while
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
data_tmp = get_elemental_map(df_crypto_2_months.loc[:,r])
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(15,15))
df_money['close'].plot()
df_solar = pd.DataFrame(data_tmp)
df_solar.index = df_money.index
#df_money.loc[['close']].join(df_solar)
df_dataset = pd.concat([df_money[['close']], df_solar], axis=1)
df_dataset.head()
from sklearn import svm
len(df_dataset)
train_size = 0.8
df_x = df_dataset[df_dataset.columns[1:]]
df_y = df_dataset['close']
train_x = df_x[:3000]
train_y = df_y[:3000]
test_x = df_x[3000:]
test_y = df_y[3000:]
from sklearn import svm
X = train_x.to_numpy()
y = train_y.to_numpy()
regr = svm.SVR(kernel='poly', C=100, gamma=0.1, epsilon=.1)
regr.fit(X, y)
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
results.plot()
results = pd.DataFrame()
results['test_x'] = regr.predict(train_x)
results['test_y'] = list(train_y)
results.plot()
df_hourly.iloc[0].name.year
df_solar.index
def get_crypto_planet_data(size):
df_c = pd.read_csv('bitstampUSD_1-min_data_2012-01-01_to_2020-09-14.csv', parse_dates=True)
# make data timestamp
df_c['date'] = pd.to_datetime(df_c['Timestamp'], unit='s')
# cast down to hourly data
groupkey = pd.to_datetime(df_c[-size:].date.dt.strftime('%Y-%m-%d %H'))
df_hourly = df_c[-size:].groupby(groupkey).agg({'Close':'last','Volume_(BTC)':'sum'})
df_hourly.head()
first_date = df_hourly.iloc[0].name
print ( first_date )
# generate ephemerial elements
h = first_date.hour
hours_in_trading_code = len(df_hourly) # stock exchange count of number differences
t_time_array = ts.utc(first_date.year, first_date.month, first_date.day, range(h,h+hours_in_trading_code), 0) # -3000 BC to 3000 BC, increments in hours
# generate empheremis for time period
df_crypto_planets = generate_planets(t_time_array) # can take a while
# selected desired planets for attribution
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
# create elemental data map
data_tmp = get_elemental_map(df_crypto_planets.loc[:,r])
# plot data map
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
# create the training dataset [Close, Solar System Time]
df_solar = pd.DataFrame(data_tmp)
df_solar.index = df_hourly.index
#df_money.loc[['close']].join(df_solar)
df_dataset = pd.concat([df_hourly[['Close']], df_solar], axis=1)
return df_dataset
get_data = get_crypto_planet_data(1000000)
len(get_data)
df_c = pd.read_csv('bitstampUSD_1-min_data_2012-01-01_to_2020-09-14.csv', parse_dates=True)
# make data timestamp
df_c['date'] = pd.to_datetime(df_c['Timestamp'], unit='s')
len(df_c[-50000:])
# cast down to hourly data
groupkey = pd.to_datetime(df_c[-500000:].date.dt.strftime('%Y-%m-%d %H'))
df_hourly = df_c[-500000:].groupby(groupkey).agg({'Close':'last','Volume_(BTC)':'sum'})
df_hourly.head()
len(df_hourly)
df_hourly.plot()
df_hourly.iloc[0]
# generate ephemerial elements
hours_in_trading_code = len(df_hourly) # stock exchange count of number differences
t_time_array = ts.utc(2019, 10, 2, range(0,hours_in_trading_code), 0) # -3000 BC to 3000 BC, increments in hours
df_crypto_2_months = generate_planets(t_time_array) # takes a while
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
data_tmp = get_elemental_map(df_crypto_2_months.loc[:,r])
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
# create the training dataset
df_solar = pd.DataFrame(data_tmp)
df_solar.index = df_hourly.index
#df_money.loc[['close']].join(df_solar)
df_dataset = pd.concat([df_hourly[['Close']], df_solar], axis=1)
df_dataset.head()
df_dataset
df_dataset.Close.dtype
from sklearn import svm
# prepaire dataset for training
df_x = df_dataset[df_dataset.columns[1:]]
df_x = df_x[df_dataset['Close'].isnull() != True]
df_y = df_dataset['Close'].dropna()
# we train on this
train_x = df_x[:6000]
train_y = df_y[:6000]
# we test on this (validate)
test_x = df_x[6000:]
test_y = df_y[6000:]
test_x.shape
df_y[df_y.isnull() == True]
X = train_x.to_numpy()
y = train_y.to_numpy()
regr = svm.SVR(kernel='linear', C=100, gamma=0.1, epsilon=.1)
regr.fit(X, y)
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
# adjust for start
# train the model
X = train_x.to_numpy()
y = train_y.to_numpy()
regr = svm.SVR(kernel='rbf', C=100, gamma=0.001, epsilon=.1)
regr.fit(X, y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
# train the model
X = train_x.to_numpy()
y = train_y.to_numpy()
regr = svm.SVR(kernel='rbf', C=1000, gamma=0.001, epsilon=.1)
regr.fit(X, y)
# see results
results = pd.DataFrame()
results[''] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
start_offset = results['test_y'][0] - results['test_x'][0]
results['test_x'] += start_offset
results.plot()
get_data = get_crypto_planet_data(1000000)
len(get_data)
# prepaire dataset for training
df_x = get_data[get_data.columns[1:]]
df_x = df_x[get_data['Close'].isnull() != True]
df_y = get_data['Close'].dropna()
# we train on this
train_x = df_x[:10000].to_numpy()
train_y = df_y[:10000].to_numpy()
# we test on this (validate)
test_x = df_x[10000:]
test_y = df_y[10000:]
# train model
regr = svm.SVR(kernel='linear', C=100, gamma=0.1, epsilon=.1)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
# train the model
regr = svm.SVR(kernel='rbf', C=100, gamma=0.001, epsilon=.1)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
results = pd.DataFrame()
results['test_x'] = np.diff(regr.predict(test_x))
results['test_y'] = np.diff(list(test_y))
results.plot()
# train the model
regr = svm.SVR(kernel='rbf', C=1000, gamma=0.001, epsilon=.01)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
# train the model
regr = svm.SVR(kernel='poly', C=100, gamma=0.01, epsilon=.1)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
# train the model
regr = svm.SVR(kernel='sigmoid', C=100, gamma=0.001, epsilon=.0001)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
get_data = get_crypto_planet_data(1000000)
len(get_data)
get_data.columns = ['Close']+ [str(x)+'_pl' for x in get_data.iloc[:,1:].columns]
get_data.columns
get_data.iloc[:,1:] = get_data.iloc[:,1:].astype(str)
get_data_flat = pd.get_dummies(get_data.iloc[:,1:])
get_data_flat['Close'] = get_data.Close
# prepaire dataset for training
df_x = get_data_flat[get_data_flat.columns[1:]]
df_x = df_x[get_data_flat['Close'].isnull() != True]
df_y = get_data_flat['Close'].dropna()
# we train on this
train_x = df_x[:10000].to_numpy()
train_y = df_y[:10000].to_numpy()
# we test on this (validate)
test_x = df_x[10000:]
test_y = df_y[10000:]
# train the model
regr = svm.SVR(kernel='rbf', C=100, gamma=0.001, epsilon=.1)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score
print ("Explained Variance:",explained_variance_score(results['test_y'].to_numpy(), results['test_x'].to_numpy()))
print ("MSE:", mean_squared_error(results['test_y'].to_numpy(), results['test_x'].to_numpy()))
print ("R2",r2_score(results['test_y'].to_numpy(), results['test_x'].to_numpy()))
Are you fucking shitting me that the algorithm almost 1:1 predicted whole 2020, based on 2019 data.
We're literally seeing 0.8866 variance explained... BY PLANET POSITIONS?
# train the model
regr = svm.SVR(kernel='poly', C=100, gamma=0.001, epsilon=.01)
regr.fit(train_x, train_y)
# see results
results = pd.DataFrame()
results['test_x'] = regr.predict(test_x)
results['test_y'] = list(test_y)
results.plot()
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as ani
import numpy as np
import pandas as pd
from celluloid import Camera
fig = plt.figure()
camera = Camera(fig) # https://pypi.org/project/celluloid/
# ax1 = plt.subplot2grid ((2, 2), (0, 0), colspan=2, rowspan=2)
for i in range(30):
#data = np.random.normal(0, 10, i+1)
#pd.DataFrame(data).plot(kind='bar', ax=ax1)
ax = sns.heatmap(data[i*50:2000+i*50], cbar=False)
# ax._legend().remove()
camera.snap()
# Set up formatting for the movie files
Writer = ani.writers['ffmpeg']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
# anim = ani.ArtistAnimation(fig, ims, interval=500, repeat_delay=3000, blit=True)
anim = camera.animate(interval=50000, repeat_delay=5000)
anim.save('textmovie.mp4', writer=writer)
def get_elemental_map(df_planets):
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles)
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*3))
return finish.astype(int)
temp = get_elemental_map(df_planets.loc[:,['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']])
temp
ax = sns.heatmap(temp[1:500])
ax = sns.heatmap(temp[1:3000])
ax = sns.heatmap(temp[1:30000])
df_planets.columns
We want to convert the julian-time to a series of values of increment
t = df.time[0]
t
def test_get_buckets(j_tt, mod_period):
for index, rotor in enumerate(range(-10,9)):
print ( math.floor(j_tt % (mod_period**rotor) / (mod_period**rotor) * mod_period) , '%', (mod_period**rotor))
def value_to_rotor(j_tt, mod_period, n_rotor_range_min=-15, n_rotor_range_max=15):
""" Returns rotary bins. Mod_periods=2 returns binary. mod_period=10 returns decimal arrangement
Good default value for n_rotor_range = 10 (this defines how many rotors there are)
"""
rotor = []
for r in range(n_rotor_range_min, n_rotor_range_max):
rot = math.floor(j_tt % (mod_period**r) / (mod_period**r) * mod_period)
rotor.append(rot)
return rotor
def rotor_to_value(rotor, mod_period, n_rotor_range_min=-15, n_rotor_range_max=15):
""" Converts rotary bins back to a decimal value """
julian_time = 0.0
print (n_rotor_range_min, n_rotor_range_max)
for index, r in enumerate(range(n_rotor_range_min, n_rotor_range_max)):
julian_time += rotor[index] * (mod_period**(r+1)) / (mod_period**2)
return julian_time
def encode_value_to_one_hot_rotor(value, mod_period, n_rotor_range_min=-15, n_rotor_range_max=15):
""" Converts a value to a 1-hot encoded rotor array """
values = value_to_rotor(value, mod_period, n_rotor_range_min, n_rotor_range_max) # get time rotation of j_tt (julian)
n_values = mod_period # = max value of period
return np.eye(n_values)[values].flatten()
def decode_one_hot_rotor_to_value(flat_1hot_rotor, mod_period, n_rotor_range_min=-15, n_rotor_range_max=15):
""" Converts a one hot rotor back to a value """
# convert to the numpy useful one-hot encoding of the different bins (there mod_period amount in every encoding)
onehot = np.reshape(flat_1hot_rotor, (int(len(flat_1hot_rotor)/mod_period),mod_period))
# get rotor values (1hot -> category array)
rotor = np.argmax(onehot, axis=1)
# convert to value
value = rotor_to_value(rotor, mod_period, n_rotor_range_min, n_rotor_range_max)
return value
# small little test for the bucketing system
print (df.time[0])
test_get_buckets(df.time[0], 10) # test
# below snippet is how to create categorical variables
values = [1, 0, 3]
categories = np.eye(4)[values] # 4 categories
categories
# back to category array
np.argmax(categories, axis=1)
# further testing
r = value_to_rotor(df.time[0], 4, -15, 15)
print ("Julian time:", df.time[0])
print("to")
print ("Rotor encoded:",r)
_t = rotor_to_value(r, 4, -15, 15)
print("Rotor encoded:",r)
print("to")
print("Julian Time from rotor", _t)
def test_encode_decode(value, mod_rotor, n_rotor_range_min, n_rotor_range_max):
a_time_rotor = encode_value_to_one_hot_rotor(value, mod_rotor, n_rotor_range_min, n_rotor_range_max)
value_decoded = decode_one_hot_rotor_to_value(a_time_rotor, mod_rotor, n_rotor_range_min, n_rotor_range_max)
print ("Julian time:", value)
print ("Flat One Hot Array:", a_time_rotor, " with length:", len(a_time_rotor))
print ("Decoded Rotor:", value_decoded)
print ("Error:", value_decoded - value)
test_encode_decode(df.time[1000], 4, -15, 15)
We'll be adding all these arrays to a dataframe, then transposing it to get the dummy values.
# this might take a time... It creates a dataframe from a series of values (like a column)
def value_array_to_rotor_df(value_array, prefix="_", mod_rotor = 4, n_rotor_range_min=-15, n_rotor_range_max=15):
encode = np.zeros((len(value_array),mod_rotor*(n_rotor_range_max-n_rotor_range_min)))
# do it across our time_range (currently in use)
time_rotors = []
for ii, t in enumerate(value_array):
encode[ii,:] = np.array(encode_value_to_one_hot_rotor(t, mod_rotor, n_rotor_range_min, n_rotor_range_max))
df_rotor = pd.DataFrame(encode)
df_rotor = df_rotor.add_prefix(prefix)
return df_rotor
df_time_rotor = value_array_to_rotor_df(t_time_array.tt, "time_rot_", 4, -15, 15)
df_time_rotor.head()
len(df_time_rotor)
encode.shape
Now add time_rot_ to df_planets, containing our ephimeris. It essentially displays time as a series of buckets
df_planets_with_trotor = df_planets.merge(df_time_rotor, left_index=True, right_index=True)
df_planets_with_trotor.head()
We quickly check if the values are correctly encoded:
# Testing of time rotor values
filter_col = [col for col in df_planets_with_trotor if col.startswith('time_rot_')]
print ("Julian Time:",df_planets_with_trotor.loc[5500,'time'])
rot = np.array(df_planets_with_trotor.loc[5500,filter_col])
print ("Rotor:", rot)
decode_one_hot_rotor_to_value(rot, 4, -15, 15)
print (df_planets_with_trotor.jupiter.min() * 180.0 / math.pi )
print (df_planets_with_trotor.jupiter.max() * 180.0 / math.pi )
First let's determine optimale rotor_count (range)
print ("Our maximum difference between 2 values is is maximum this value", df_planets_with_trotor.sun.diff().max())
print ("So let's not add useless precision to the onehot encoding")
val = df_planets_with_trotor.sun.max()
test_encode_decode(val, 4, -10, 3) # optimale parameters for 0 error
val = df_planets_with_trotor.sun.min()
test_encode_decode(val, 4, -10, 3) # optimale parameters for 0 error
Above errors are small enough to proceed with this encoding.
planets = ['moon', 'mercury', 'venus', 'sun', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune', 'pluto']
for planet_name in planets:
print ("Encoding:", planet_name)
# calculate rotor for planet
df_planet_rotor = \
value_array_to_rotor_df(df_planets_with_trotor[planet_name], planet_name+"_rot_", 4, -10, 3)
# add planet to `df_planets_with_trotor`
df_planets_with_trotor = df_planets_with_trotor.merge(df_planet_rotor, left_index=True, right_index=True)
df_planets_with_trotor.drop('earth', axis=1, inplace=True)
df_planets_with_trotor.head()
df_planets_with_trotor.to_csv("100_year_rotor_encoded_ephemeris.csv", sep=';', index=False)
len (df_planets_with_trotor)
When more planets will be active, the loss algorithm should be taking an argmax across "planet rotors", so that the neural network learns the "sphere grouping" across outputs (520 when taking all spheres into account).
We train till the precision will be small enough, say E <= 0.00001, should be adequate enough I think.
Then we validate the data on the other 50 years, and we're very anxious for the results.
def planet1_looks_at_planet2(p1, p2, t):
return p1.at(t).observe(p2).apparent()
def observe(p1, p2, t):
""" Observe planet 1 to planet 2 ... observe(p1, p2, timestamp).degrees, or use .radians for radiance """
t = get_ts(t)
astrometric = planet1_looks_at_planet2(p1, p2, t)
_, lon, _ = astrometric.ecliptic_latlon()
return lon
def getEarthPos(t, planet):
""" Given a time t and planet p, return longitude position of planet on ecliptic plane """
return observe(_planets['earth'], planet, t).radians
t = datetime(1, 12, 9, 0, 14, 0, tzinfo=utc)
t
t = datetime(1970, 12, 9, 0, 14, 0, tzinfo=utc)
earth = _planets['earth']
jupiter = _planets['jupiter']
observe(earth, jupiter, t).radians
start = datetime(1970, 1, 1, 0, 0, 0, tzinfo=utc)
end = datetime(2020, 4, 12, 0, 0, 0, tzinfo=utc)
dates = pd.date_range(start = start, end = end, tz=pytz.UTC, freq='2H').to_pydatetime().tolist()
# reason for 2H frequency: we want to divide the day up in 12 segments, which is a nice amount
len(dates)
import math
bodies = ['sun', 'earth', 'moon', 'venus', 'mars', 'jupiter']
positions = {
'sun': (lambda t: observe(earth, _planets['sun'], t).radians),
'earth': (lambda t: observe(earth, _planets['sun'], t).radians - math.pi),
'moon': (lambda t: observe(earth, _planets['moon'], t).radians),
'venus': (lambda t: observe(earth, _planets['venus'], t).radians),
'mars': (lambda t: observe(earth, _planets['mars'], t).radians),
'jupiter': (lambda t: observe(earth, _planets['jupiter'], t).radians)
}
def generate_table(dates):
new_table = {}
new_table['t'] = dates
for b in bodies:
new_table[b] = [] # assign body nae as key
for t in dates: # calculate for every date in dates
body_pos = positions[b](t) # calculate position, using this planet of body
new_table[b].append( body_pos ) # generate a new table
df = pd.DataFrame(data=new_table)
return df
df = generate_table(dates)
df.sample(15)
df.to_csv('planets.csv', sep=';')